tasklist_lock -> domlist_lock.
domlist_lock and heap_lock no longer disable interrupts.
This means that find_domain_by_id(), put_domain(), alloc_domheap_pages()
and others are not interrupt-safe.
alloc_xenheap_pages(), xmalloc() and friends *are* still safe to use
in interrupt context.
{
#ifdef PDB_PAST
struct domain *p;
- u_long flags;
#endif /* PDB_PAST */
int buf_idx = 0;
{
int count = 0;
- read_lock_irqsave (&tasklist_lock, flags);
+ read_lock(&domlist_lock);
pdb_out_buffer[buf_idx++] = 'm';
for_each_domain ( p )
}
pdb_out_buffer[buf_idx++] = 0;
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
break;
}
case PDB_LVL_GUESTOS: /* return a list of processes */
char message[16];
struct domain *p;
- p = find_domain_by_id(pdb_ctx[pdb_level].info);
- strncpy (message, p->name, 16);
- put_domain(p);
+ strncpy (message, dom0->name, 16);
ptr += 16;
if (hexToInt (&ptr, &thread))
}
else
{
- struct domain *p = find_domain_by_id(0);
printk ("pdb error: cr3: 0x%lx dom0cr3: 0x%lx\n", cr3,
- p->mm.shadow_mode ? pagetable_val(p->mm.shadow_table)
- : pagetable_val(p->mm.pagetable));
- put_domain(p);
+ dom0->mm.shadow_mode ? pagetable_val(dom0->mm.shadow_table)
+ : pagetable_val(dom0->mm.pagetable));
printk ("pdb error: L2:0x%p (0x%lx)\n",
l2_table, l2_pgentry_val(*l2_table));
}
unsigned long nmi_softirq_reason;
static void nmi_softirq(void)
{
- struct domain *d = find_domain_by_id(0);
-
- if ( d == NULL )
+ if ( dom0 == NULL )
return;
if ( test_and_clear_bit(0, &nmi_softirq_reason) )
- send_guest_virq(d, VIRQ_PARITY_ERR);
+ send_guest_virq(dom0, VIRQ_PARITY_ERR);
if ( test_and_clear_bit(1, &nmi_softirq_reason) )
- send_guest_virq(d, VIRQ_IO_ERR);
-
- put_domain(d);
+ send_guest_virq(dom0, VIRQ_IO_ERR);
}
asmlinkage void math_state_restore(struct pt_regs *regs, long error_code)
#include <xen/perfc.h>
#include <asm/domain_page.h>
#include <asm/flushtlb.h>
+#include <asm/hardirq.h>
unsigned long *mapcache;
static unsigned int map_idx, epoch, shadow_epoch[NR_CPUS];
unsigned long va;
unsigned int idx, cpu = smp_processor_id();
unsigned long *cache = mapcache;
- unsigned long flags;
+ ASSERT(!in_irq());
perfc_incrc(map_domain_mem_count);
- spin_lock_irqsave(&map_lock, flags);
+ spin_lock(&map_lock);
/* Has some other CPU caused a wrap? We must flush if so. */
if ( epoch != shadow_epoch[cpu] )
cache[idx] = (pa & PAGE_MASK) | __PAGE_HYPERVISOR;
- spin_unlock_irqrestore(&map_lock, flags);
+ spin_unlock(&map_lock);
va = MAPCACHE_VIRT_START + (idx << PAGE_SHIFT) + (pa & ~PAGE_MASK);
return (void *)va;
struct domain *d;
unsigned int i, cnt[NR_CPUS] = { 0 };
- read_lock_irq(&tasklist_lock);
+ read_lock(&domlist_lock);
for_each_domain ( d )
cnt[d->processor]++;
- read_unlock_irq(&tasklist_lock);
+ read_unlock(&domlist_lock);
for ( i = 0; i < smp_num_cpus; i++ )
if ( cnt[i] < cnt[pro] )
{
full_execution_context_t *c;
struct domain *d;
- unsigned long flags;
- read_lock_irqsave(&tasklist_lock, flags);
+ read_lock(&domlist_lock);
for_each_domain ( d )
{
if ( (d == NULL) || !get_domain(d) )
{
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
ret = -ESRCH;
break;
}
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
op->u.getdomaininfo.domain = d->id;
#include <public/dom0_ops.h>
#include <asm/domain_page.h>
-/* Both these structures are protected by the tasklist_lock. */
-rwlock_t tasklist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
-struct domain *task_hash[TASK_HASH_SIZE];
-struct domain *task_list;
+/* Both these structures are protected by the domlist_lock. */
+rwlock_t domlist_lock __cacheline_aligned = RW_LOCK_UNLOCKED;
+struct domain *domain_hash[DOMAIN_HASH_SIZE];
+struct domain *domain_list;
struct domain *do_createdomain(domid_t dom_id, unsigned int cpu)
{
struct domain *d, **pd;
- unsigned long flags;
if ( (d = alloc_domain_struct()) == NULL )
return NULL;
sched_add_domain(d);
- write_lock_irqsave(&tasklist_lock, flags);
- pd = &task_list; /* NB. task_list is maintained in order of dom_id. */
- for ( pd = &task_list; *pd != NULL; pd = &(*pd)->next_list )
+ write_lock(&domlist_lock);
+ pd = &domain_list; /* NB. domain_list maintained in order of dom_id. */
+ for ( pd = &domain_list; *pd != NULL; pd = &(*pd)->next_list )
if ( (*pd)->id > d->id )
break;
d->next_list = *pd;
*pd = d;
- d->next_hash = task_hash[TASK_HASH(dom_id)];
- task_hash[TASK_HASH(dom_id)] = d;
- write_unlock_irqrestore(&tasklist_lock, flags);
+ d->next_hash = domain_hash[DOMAIN_HASH(dom_id)];
+ domain_hash[DOMAIN_HASH(dom_id)] = d;
+ write_unlock(&domlist_lock);
}
else
{
struct domain *find_domain_by_id(domid_t dom)
{
struct domain *d;
- unsigned long flags;
- read_lock_irqsave(&tasklist_lock, flags);
- d = task_hash[TASK_HASH(dom)];
+ read_lock(&domlist_lock);
+ d = domain_hash[DOMAIN_HASH(dom)];
while ( d != NULL )
{
if ( d->id == dom )
}
d = d->next_hash;
}
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
return d;
}
struct domain *find_last_domain(void)
{
struct domain *d, *dlast;
- unsigned long flags;
- read_lock_irqsave(&tasklist_lock, flags);
- dlast = task_list;
+ read_lock(&domlist_lock);
+ dlast = domain_list;
d = dlast->next_list;
while ( d != NULL )
{
}
if ( !get_domain(dlast) )
dlast = NULL;
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
return dlast;
}
void domain_crash(void)
{
- struct domain *d;
-
if ( current->id == 0 )
BUG();
set_bit(DF_CRASHED, ¤t->flags);
- d = find_domain_by_id(0);
- send_guest_virq(d, VIRQ_DOM_EXC);
- put_domain(d);
+ send_guest_virq(dom0, VIRQ_DOM_EXC);
__enter_scheduler();
BUG();
void domain_shutdown(u8 reason)
{
- struct domain *d;
-
if ( current->id == 0 )
{
extern void machine_restart(char *);
current->shutdown_code = reason;
set_bit(DF_SHUTDOWN, ¤t->flags);
- d = find_domain_by_id(0);
- send_guest_virq(d, VIRQ_DOM_EXC);
- put_domain(d);
+ send_guest_virq(dom0, VIRQ_DOM_EXC);
__enter_scheduler();
}
void domain_destruct(struct domain *d)
{
struct domain **pd;
- unsigned long flags;
atomic_t old, new;
if ( !test_bit(DF_DYING, &d->flags) )
return;
/* Delete from task list and task hashtable. */
- write_lock_irqsave(&tasklist_lock, flags);
- pd = &task_list;
+ write_lock(&domlist_lock);
+ pd = &domain_list;
while ( *pd != d )
pd = &(*pd)->next_list;
*pd = d->next_list;
- pd = &task_hash[TASK_HASH(d->id)];
+ pd = &domain_hash[DOMAIN_HASH(d->id)];
while ( *pd != d )
pd = &(*pd)->next_hash;
*pd = d->next_hash;
- write_unlock_irqrestore(&tasklist_lock, flags);
+ write_unlock(&domlist_lock);
destroy_event_channels(d);
grant_table_destroy(d);
unsigned long xenheap_phys_end;
xmem_cache_t *domain_struct_cachep;
+struct domain *dom0;
vm_assist_info_t vm_assist_info[MAX_VMASST_TYPE + 1];
void cmain(multiboot_info_t *mbi)
{
- struct domain *new_dom;
unsigned long max_page;
unsigned char *cmdline;
module_t *mod = (module_t *)__va(mbi->mods_addr);
grant_table_init();
/* Create initial domain 0. */
- new_dom = do_createdomain(0, 0);
- if ( new_dom == NULL )
+ dom0 = do_createdomain(0, 0);
+ if ( dom0 == NULL )
panic("Error creating domain 0\n");
- set_bit(DF_PRIVILEGED, &new_dom->flags);
+ set_bit(DF_PRIVILEGED, &dom0->flags);
shadow_mode_init();
* We're going to setup domain0 using the module(s) that we stashed safely
* above our heap. The second module, if present, is an initrd ramdisk.
*/
- if ( construct_dom0(new_dom, dom0_memory_start, dom0_memory_end,
+ if ( construct_dom0(dom0, dom0_memory_start, dom0_memory_end,
(char *)initial_images_start,
mod[0].mod_end-mod[0].mod_start,
(mbi->mods_count == 1) ? 0 :
init_trace_bufs();
domain_unpause_by_systemcontroller(current);
- domain_unpause_by_systemcontroller(new_dom);
+ domain_unpause_by_systemcontroller(dom0);
startup_cpu_idle_loop();
}
void do_task_queues(unsigned char key)
{
- unsigned long flags;
struct domain *d;
s_time_t now = NOW();
struct list_head *ent;
printk("'%c' pressed -> dumping task queues (now=0x%X:%08X)\n", key,
(u32)(now>>32), (u32)now);
- read_lock_irqsave(&tasklist_lock, flags);
+ read_lock(&domlist_lock);
for_each_domain ( d )
{
send_guest_virq(d, VIRQ_DEBUG);
}
- read_unlock_irqrestore(&tasklist_lock, flags);
+ read_unlock(&domlist_lock);
}
extern void dump_runq(unsigned char key);
#define allocated_in_map(_pn) \
(alloc_bitmap[(_pn)/PAGES_PER_MAPWORD] & (1<<((_pn)&(PAGES_PER_MAPWORD-1))))
-
/*
* Hint regarding bitwise arithmetic in map_{alloc,free}:
* -(1<<n) sets all bits >= n.
static spinlock_t heap_lock = SPIN_LOCK_UNLOCKED;
-
/* Initialise allocator to handle up to @max_pages. */
unsigned long init_heap_allocator(
unsigned long bitmap_start, unsigned long max_pages)
return bitmap_start + bitmap_size;
}
+
/* Hand the specified arbitrary page range to the specified heap zone. */
void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages)
{
{
int i;
struct pfn_info *pg;
- unsigned long flags;
if ( unlikely(order < MIN_ORDER) || unlikely(order > MAX_ORDER) )
return NULL;
- spin_lock_irqsave(&heap_lock, flags);
+ spin_lock(&heap_lock);
/* Find smallest order which can satisfy the request. */
for ( i = order; i < NR_ORDERS; i++ )
map_alloc(page_to_pfn(pg), 1 << order);
avail[zone] -= 1 << order;
- spin_unlock_irqrestore(&heap_lock, flags);
+ spin_unlock(&heap_lock);
return pg;
no_memory:
- spin_unlock_irqrestore(&heap_lock, flags);
+ spin_unlock(&heap_lock);
return NULL;
}
void free_heap_pages(int zone, struct pfn_info *pg, int order)
{
unsigned long mask;
- unsigned long flags;
- spin_lock_irqsave(&heap_lock, flags);
+ spin_lock(&heap_lock);
map_free(page_to_pfn(pg), 1 << order);
avail[zone] += 1 << order;
PFN_ORDER(pg) = order;
list_add_tail(&pg->list, &heap[zone][order]);
- spin_unlock_irqrestore(&heap_lock, flags);
+ spin_unlock(&heap_lock);
}
void init_xenheap_pages(unsigned long ps, unsigned long pe)
{
+ unsigned long flags;
+
ps = round_pgup(ps);
pe = round_pgdown(pe);
+
memguard_guard_range(__va(ps), pe - ps);
+
+ local_irq_save(flags);
init_heap_pages(MEMZONE_XEN, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
+ local_irq_restore(flags);
}
+
unsigned long alloc_xenheap_pages(int order)
{
+ unsigned long flags;
struct pfn_info *pg;
int i, attempts = 0;
retry:
- if ( unlikely((pg = alloc_heap_pages(MEMZONE_XEN, order)) == NULL) )
+ local_irq_save(flags);
+ pg = alloc_heap_pages(MEMZONE_XEN, order);
+ local_irq_restore(flags);
+
+ if ( unlikely(pg == NULL) )
goto no_memory;
memguard_unguard_range(page_to_virt(pg), 1 << (order + PAGE_SHIFT));
return 0;
}
+
void free_xenheap_pages(unsigned long p, int order)
{
+ unsigned long flags;
+
memguard_guard_range((void *)p, 1 << (order + PAGE_SHIFT));
+
+ local_irq_save(flags);
free_heap_pages(MEMZONE_XEN, virt_to_page(p), order);
+ local_irq_restore(flags);
}
void init_domheap_pages(unsigned long ps, unsigned long pe)
{
+ ASSERT(!in_irq());
+
ps = round_pgup(ps);
pe = round_pgdown(pe);
+
init_heap_pages(MEMZONE_DOM, phys_to_page(ps), (pe - ps) >> PAGE_SHIFT);
}
+
struct pfn_info *alloc_domheap_pages(struct domain *d, int order)
{
struct pfn_info *pg;
return pg;
}
+
void free_domheap_pages(struct pfn_info *pg, int order)
{
int i, drop_dom_ref;
struct domain *d = pg->u.inuse.domain;
void *p;
+ ASSERT(!in_irq());
+
if ( unlikely(IS_XEN_HEAP_FRAME(pg)) )
{
/* NB. May recursively lock from domain_relinquish_memory(). */
put_domain(d);
}
+
unsigned long avail_domheap_pages(void)
{
return avail[MEMZONE_DOM];
}
-
{
ASSERT(!local_irq_is_enabled());
- write_lock(&tasklist_lock);
+ write_lock(&domlist_lock);
for_each_domain ( p )
{
}
}
- write_unlock(&tasklist_lock);
+ write_unlock(&domlist_lock);
CPU_SVT(cpu) -= 0xe0000000;
}
unsigned long nr_pages;
char *rawbuf;
struct t_buf *buf;
- struct domain *dom0;
if ( opt_tbuf_size == 0 )
{
/* Share pages so that xentrace can map them. */
- dom0 = find_domain_by_id(0);
-
- for( i = 0; i < nr_pages; i++)
+ for ( i = 0; i < nr_pages; i++ )
SHARE_PFN_WITH_DOMAIN(virt_to_page(rawbuf+(i*PAGE_SIZE)), dom0);
- put_domain(dom0);
-
for ( i = 0; i < smp_num_cpus; i++ )
{
buf = t_bufs[i] = (struct t_buf *)&rawbuf[i*opt_tbuf_size*PAGE_SIZE];
static void __serial_rx(unsigned char c, struct pt_regs *regs)
{
- struct domain *d;
-
if ( xen_rx )
{
handle_keypress(c);
{
serial_rx_ring[SERIAL_RX_MASK(serial_rx_prod)] = c;
if ( serial_rx_prod++ == serial_rx_cons )
- {
- d = find_domain_by_id(0); /* only DOM0 reads the serial buffer */
- send_guest_virq(d, VIRQ_CONSOLE);
- put_domain(d);
- }
+ send_guest_virq(dom0, VIRQ_CONSOLE);
}
}
struct domain;
struct pfn_info;
-/* Generic allocator */
+/* Generic allocator. These functions are *not* interrupt-safe. */
unsigned long init_heap_allocator(
unsigned long bitmap_start, unsigned long max_pages);
void init_heap_pages(int zone, struct pfn_info *pg, unsigned long nr_pages);
void free_heap_pages(int zone, struct pfn_info *pg, int order);
void scrub_heap_pages(void);
-/* Xen suballocator */
+/* Xen suballocator. These functions are interrupt-safe. */
void init_xenheap_pages(unsigned long ps, unsigned long pe);
unsigned long alloc_xenheap_pages(int order);
void free_xenheap_pages(unsigned long p, int order);
#define alloc_xenheap_page() (alloc_xenheap_pages(0))
#define free_xenheap_page(_p) (free_xenheap_pages(_p,0))
-/* Domain suballocator */
+/* Domain suballocator. These functions are *not* interrupt-safe.*/
void init_domheap_pages(unsigned long ps, unsigned long pe);
struct pfn_info *alloc_domheap_pages(struct domain *d, int order);
void free_domheap_pages(struct pfn_info *pg, int order);
#include <xen/grant_table.h>
extern unsigned long volatile jiffies;
-extern rwlock_t tasklist_lock;
+extern rwlock_t domlist_lock;
struct domain;
+/* A global pointer to the initial domain (DOM0). */
+extern struct domain *dom0;
+
typedef struct event_channel_st
{
#define ECS_FREE 0 /* Channel is available for use. */
void continue_nonidle_task(void);
-/* This task_hash and task_list are protected by the tasklist_lock. */
-#define TASK_HASH_SIZE 256
-#define TASK_HASH(_id) ((int)(_id)&(TASK_HASH_SIZE-1))
-extern struct domain *task_hash[TASK_HASH_SIZE];
-extern struct domain *task_list;
+/* This domain_hash and domain_list are protected by the domlist_lock. */
+#define DOMAIN_HASH_SIZE 256
+#define DOMAIN_HASH(_id) ((int)(_id)&(DOMAIN_HASH_SIZE-1))
+extern struct domain *domain_hash[DOMAIN_HASH_SIZE];
+extern struct domain *domain_list;
#define for_each_domain(_p) \
- for ( (_p) = task_list; (_p) != NULL; (_p) = (_p)->next_list )
+ for ( (_p) = domain_list; (_p) != NULL; (_p) = (_p)->next_list )
#define DF_DONEFPUINIT 0 /* Has the FPU been initialised for this task? */
#define DF_USEDFPU 1 /* Has this task used the FPU since last save? */